-
Notifications
You must be signed in to change notification settings - Fork 32
Multiple critical bugfixes #563
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Codecov Report❌ Patch coverage is
Additional details and impacted files@@ Coverage Diff @@
## master #563 +/- ##
=======================================
Coverage 79.42% 79.42%
=======================================
Files 48 48
Lines 3150 3150
=======================================
Hits 2502 2502
Misses 648 648 ☔ View full report in Codecov by Sentry. 🚀 New features to boost your workflow:
|
Wrap all generated oneMKL C wrappers in try/catch blocks to propagate errors as return codes instead of crashing. LAPACK functions catch computation_error and return info(), while BLAS/sparse functions catch sycl::exception and return -1. Also add syclQueueWait to the SYCL interop layer.
Work around a Blelloch parallel prefix sum correctness issue on Intel GPUs at block sizes >= 128 by defaulting to a block size of 64.
Widen Int8/Int16/Float16 values to Int32 before writing to local memory to avoid clobbering adjacent bytes. Adjusts the local memory size calculation accordingly.
|
Your PR requires formatting changes to meet the project's style guidelines. Click here to view the suggested changes.diff --git a/deps/generate_interfaces.jl b/deps/generate_interfaces.jl
index 108518d..505a71c 100644
--- a/deps/generate_interfaces.jl
+++ b/deps/generate_interfaces.jl
@@ -444,48 +444,48 @@ function generate_cpp(library::String, filename::Vector{String}, output::String;
variant = "column_major::"
end
- # Build catch clause: LAPACK functions also catch computation_error for info
- lapack_catch = "catch (const oneapi::mkl::lapack::computation_error& e) { return e.info(); } catch (const sycl::exception& e) { return -1; }"
- sycl_catch = "catch (const sycl::exception& e) { return -1; }"
+ # Build catch clause: LAPACK functions also catch computation_error for info
+ lapack_catch = "catch (const oneapi::mkl::lapack::computation_error& e) { return e.info(); } catch (const sycl::exception& e) { return -1; }"
+ sycl_catch = "catch (const sycl::exception& e) { return -1; }"
write(oneapi_cpp, "extern \"C\" $header {\n")
if template
type = version_types[version]
- if !occursin("scratchpad_size", name)
- catch_clause = library == "lapack" ? lapack_catch : sycl_catch
- write(oneapi_cpp, " try {\n")
- write(oneapi_cpp, " auto status = oneapi::mkl::$library::$variant$name<$type>($parameters, {});\n")
- write(oneapi_cpp, " device_queue->val.wait_and_throw();\n")
- write(oneapi_cpp, " } $catch_clause\n")
- end
- if occursin("scratchpad_size", name)
- write(oneapi_cpp, " int64_t scratchpad_size = oneapi::mkl::$library::$variant$name<$type>($parameters);\n device_queue->val.wait_and_throw();\n")
- end
+ if !occursin("scratchpad_size", name)
+ catch_clause = library == "lapack" ? lapack_catch : sycl_catch
+ write(oneapi_cpp, " try {\n")
+ write(oneapi_cpp, " auto status = oneapi::mkl::$library::$variant$name<$type>($parameters, {});\n")
+ write(oneapi_cpp, " device_queue->val.wait_and_throw();\n")
+ write(oneapi_cpp, " } $catch_clause\n")
+ end
+ if occursin("scratchpad_size", name)
+ write(oneapi_cpp, " int64_t scratchpad_size = oneapi::mkl::$library::$variant$name<$type>($parameters);\n device_queue->val.wait_and_throw();\n")
+ end
else
if !(name ∈ void_output)
- has_queue = occursin("device_queue", parameters)
- is_scratchpad = occursin("scratchpad_size", name)
- if has_queue && !is_scratchpad
- catch_clause = library == "lapack" ? lapack_catch : sycl_catch
- write(oneapi_cpp, " try {\n")
- write(oneapi_cpp, " auto status = oneapi::mkl::$library::$variant$name($parameters, {});\n")
- write(oneapi_cpp, " device_queue->val.wait_and_throw();\n")
- write(oneapi_cpp, " } $catch_clause\n")
- else
- write(oneapi_cpp, " auto status = oneapi::mkl::$library::$variant$name($parameters, {});\n")
- if has_queue
- write(oneapi_cpp, " device_queue->val.wait_and_throw();\n")
- end
- end
+ has_queue = occursin("device_queue", parameters)
+ is_scratchpad = occursin("scratchpad_size", name)
+ if has_queue && !is_scratchpad
+ catch_clause = library == "lapack" ? lapack_catch : sycl_catch
+ write(oneapi_cpp, " try {\n")
+ write(oneapi_cpp, " auto status = oneapi::mkl::$library::$variant$name($parameters, {});\n")
+ write(oneapi_cpp, " device_queue->val.wait_and_throw();\n")
+ write(oneapi_cpp, " } $catch_clause\n")
+ else
+ write(oneapi_cpp, " auto status = oneapi::mkl::$library::$variant$name($parameters, {});\n")
+ if has_queue
+ write(oneapi_cpp, " device_queue->val.wait_and_throw();\n")
+ end
+ end
else
- if occursin("device_queue", parameters)
- write(oneapi_cpp, " try {\n")
- write(oneapi_cpp, " oneapi::mkl::$library::$variant$name($parameters);\n")
- write(oneapi_cpp, " device_queue->val.wait_and_throw();\n")
- write(oneapi_cpp, " } $sycl_catch\n")
- else
- write(oneapi_cpp, " oneapi::mkl::$library::$variant$name($parameters);\n")
- end
+ if occursin("device_queue", parameters)
+ write(oneapi_cpp, " try {\n")
+ write(oneapi_cpp, " oneapi::mkl::$library::$variant$name($parameters);\n")
+ write(oneapi_cpp, " device_queue->val.wait_and_throw();\n")
+ write(oneapi_cpp, " } $sycl_catch\n")
+ else
+ write(oneapi_cpp, " oneapi::mkl::$library::$variant$name($parameters);\n")
+ end
end
end
if occursin("scratchpad_size", name)
diff --git a/lib/support/liboneapi_support.jl b/lib/support/liboneapi_support.jl
index 91146c9..1b5d754 100644
--- a/lib/support/liboneapi_support.jl
+++ b/lib/support/liboneapi_support.jl
@@ -59,7 +59,7 @@ function syclQueueDestroy(obj)
end
function syclQueueWait(obj)
- @ccall liboneapi_support.syclQueueWait(obj::syclQueue_t)::Cint
+ return @ccall liboneapi_support.syclQueueWait(obj::syclQueue_t)::Cint
end
mutable struct syclEvent_st end
diff --git a/src/accumulate.jl b/src/accumulate.jl
index d3647fa..769928d 100644
--- a/src/accumulate.jl
+++ b/src/accumulate.jl
@@ -7,12 +7,16 @@ import AcceleratedKernels as AK
const _ACCUMULATE_BLOCK_SIZE = 64
# Accumulate operations using AcceleratedKernels
-Base.accumulate!(op, B::oneArray, A::oneArray; init = zero(eltype(A)),
- block_size = _ACCUMULATE_BLOCK_SIZE, kwargs...) =
+Base.accumulate!(
+ op, B::oneArray, A::oneArray; init = zero(eltype(A)),
+ block_size = _ACCUMULATE_BLOCK_SIZE, kwargs...
+) =
AK.accumulate!(op, B, A, oneAPIBackend(); init, block_size, kwargs...)
-Base.accumulate(op, A::oneArray; init = zero(eltype(A)),
- block_size = _ACCUMULATE_BLOCK_SIZE, kwargs...) =
+Base.accumulate(
+ op, A::oneArray; init = zero(eltype(A)),
+ block_size = _ACCUMULATE_BLOCK_SIZE, kwargs...
+) =
AK.accumulate(op, A, oneAPIBackend(); init, block_size, kwargs...)
Base.cumsum(src::oneArray; block_size = _ACCUMULATE_BLOCK_SIZE, kwargs...) =
diff --git a/src/mapreduce.jl b/src/mapreduce.jl
index 822b9b1..8374200 100644
--- a/src/mapreduce.jl
+++ b/src/mapreduce.jl
@@ -12,13 +12,13 @@
@inline _widen_type(::Type{UInt8}) = Int32
@inline _widen_type(::Type{Int16}) = Int32
@inline _widen_type(::Type{UInt16}) = Int32
-@inline _widen_type(::Type{T}) where T = T
+@inline _widen_type(::Type{T}) where {T} = T
# Dispatch-based conversions so the compiler never generates `%` for non-integer types
-@inline _to_wide(val, ::Type{W}) where W = val % W
-@inline _to_wide(val::T, ::Type{T}) where T = val
-@inline _from_wide(val, ::Type{T}) where T = val % T
-@inline _from_wide(val::T, ::Type{T}) where T = val
+@inline _to_wide(val, ::Type{W}) where {W} = val % W
+@inline _to_wide(val::T, ::Type{T}) where {T} = val
+@inline _from_wide(val, ::Type{T}) where {T} = val % T
+@inline _from_wide(val::T, ::Type{T}) where {T} = val
# Reduce a value across a group, using local memory for communication
@inline function reduce_group(op, val::T, neutral, ::Val{maxitems}) where {T, maxitems}
@@ -37,7 +37,7 @@
index = 2 * d * (item-1) + 1
@inbounds if index <= items
other_val = if index + d <= items
- _from_wide(shared[index+d], T)
+ _from_wide(shared[index + d], T)
else
neutral
end |
Uh oh!
There was an error while loading. Please reload this page.